Long Short-Term Memory network is a type of Recurrent Neural Network


In [1]:
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error


---------------------------------------------------------------------------
ImportError                               Traceback (most recent call last)
<ipython-input-1-ffc4e011e5bc> in <module>()
      3 import pandas
      4 import math
----> 5 from keras.models import Sequential
      6 from keras.layers import Dense
      7 from keras.layers import LSTM

ImportError: No module named keras.models

In [ ]:
# fix random seed for reproducibility
numpy.random.seed(7)

In [9]:
# load the dataset
dataframe = pandas.read_csv('tweetvolumes.csv', usecols=[1], engine='python')
dataset = dataframe.values
dataset = dataset.astype('float32')

In [10]:
dataframe.describe()


Out[10]:
Number of Tweets
count 171.000000
mean 1204.011696
std 625.290060
min 275.000000
25% 721.500000
50% 968.000000
75% 1641.500000
max 4278.000000

In [11]:
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)

In [12]:
# split into train and test sets - train set ends on May 12
train_size = 61
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size-10:len(dataset),:]
print(len(train), len(test))


(61, 120)

In [13]:
def create_dataset(dataset, look_back=1):
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back-1):
        a = dataset[i:(i+look_back), 0]
        dataX.append(a)
        dataY.append(dataset[i + look_back, 0])
    return numpy.array(dataX), numpy.array(dataY)

In [14]:
# reshape into X=t and Y=t+1
look_back = 7
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)

In [15]:
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))

In [16]:
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_dim=look_back))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, nb_epoch=100, batch_size=1, verbose=2)


Epoch 1/100
1s - loss: 0.1558
Epoch 2/100
1s - loss: 0.0597
Epoch 3/100
1s - loss: 0.0220
Epoch 4/100
1s - loss: 0.0099
Epoch 5/100
1s - loss: 0.0070
Epoch 6/100
1s - loss: 0.0065
Epoch 7/100
1s - loss: 0.0064
Epoch 8/100
1s - loss: 0.0065
Epoch 9/100
1s - loss: 0.0064
Epoch 10/100
1s - loss: 0.0064
Epoch 11/100
1s - loss: 0.0065
Epoch 12/100
1s - loss: 0.0064
Epoch 13/100
1s - loss: 0.0065
Epoch 14/100
1s - loss: 0.0065
Epoch 15/100
1s - loss: 0.0065
Epoch 16/100
1s - loss: 0.0063
Epoch 17/100
1s - loss: 0.0063
Epoch 18/100
1s - loss: 0.0064
Epoch 19/100
1s - loss: 0.0063
Epoch 20/100
1s - loss: 0.0063
Epoch 21/100
1s - loss: 0.0063
Epoch 22/100
1s - loss: 0.0063
Epoch 23/100
1s - loss: 0.0062
Epoch 24/100
1s - loss: 0.0063
Epoch 25/100
1s - loss: 0.0062
Epoch 26/100
1s - loss: 0.0063
Epoch 27/100
1s - loss: 0.0062
Epoch 28/100
1s - loss: 0.0062
Epoch 29/100
1s - loss: 0.0062
Epoch 30/100
1s - loss: 0.0063
Epoch 31/100
1s - loss: 0.0062
Epoch 32/100
1s - loss: 0.0061
Epoch 33/100
1s - loss: 0.0062
Epoch 34/100
1s - loss: 0.0061
Epoch 35/100
1s - loss: 0.0061
Epoch 36/100
1s - loss: 0.0062
Epoch 37/100
1s - loss: 0.0062
Epoch 38/100
1s - loss: 0.0061
Epoch 39/100
1s - loss: 0.0062
Epoch 40/100
1s - loss: 0.0060
Epoch 41/100
1s - loss: 0.0060
Epoch 42/100
1s - loss: 0.0060
Epoch 43/100
1s - loss: 0.0060
Epoch 44/100
1s - loss: 0.0060
Epoch 45/100
1s - loss: 0.0059
Epoch 46/100
1s - loss: 0.0059
Epoch 47/100
1s - loss: 0.0060
Epoch 48/100
1s - loss: 0.0060
Epoch 49/100
1s - loss: 0.0059
Epoch 50/100
1s - loss: 0.0060
Epoch 51/100
1s - loss: 0.0061
Epoch 52/100
1s - loss: 0.0060
Epoch 53/100
1s - loss: 0.0060
Epoch 54/100
1s - loss: 0.0058
Epoch 55/100
1s - loss: 0.0059
Epoch 56/100
1s - loss: 0.0058
Epoch 57/100
1s - loss: 0.0059
Epoch 58/100
1s - loss: 0.0059
Epoch 59/100
1s - loss: 0.0060
Epoch 60/100
1s - loss: 0.0058
Epoch 61/100
1s - loss: 0.0058
Epoch 62/100
1s - loss: 0.0058
Epoch 63/100
1s - loss: 0.0058
Epoch 64/100
1s - loss: 0.0059
Epoch 65/100
1s - loss: 0.0059
Epoch 66/100
1s - loss: 0.0061
Epoch 67/100
1s - loss: 0.0058
Epoch 68/100
1s - loss: 0.0058
Epoch 69/100
1s - loss: 0.0059
Epoch 70/100
1s - loss: 0.0059
Epoch 71/100
1s - loss: 0.0059
Epoch 72/100
1s - loss: 0.0058
Epoch 73/100
1s - loss: 0.0059
Epoch 74/100
1s - loss: 0.0058
Epoch 75/100
1s - loss: 0.0057
Epoch 76/100
1s - loss: 0.0057
Epoch 77/100
1s - loss: 0.0057
Epoch 78/100
1s - loss: 0.0057
Epoch 79/100
1s - loss: 0.0057
Epoch 80/100
1s - loss: 0.0057
Epoch 81/100
1s - loss: 0.0057
Epoch 82/100
1s - loss: 0.0057
Epoch 83/100
1s - loss: 0.0056
Epoch 84/100
1s - loss: 0.0057
Epoch 85/100
1s - loss: 0.0057
Epoch 86/100
1s - loss: 0.0057
Epoch 87/100
1s - loss: 0.0057
Epoch 88/100
1s - loss: 0.0057
Epoch 89/100
1s - loss: 0.0057
Epoch 90/100
1s - loss: 0.0057
Epoch 91/100
1s - loss: 0.0057
Epoch 92/100
1s - loss: 0.0056
Epoch 93/100
1s - loss: 0.0056
Epoch 94/100
1s - loss: 0.0056
Epoch 95/100
1s - loss: 0.0056
Epoch 96/100
1s - loss: 0.0058
Epoch 97/100
1s - loss: 0.0057
Epoch 98/100
1s - loss: 0.0057
Epoch 99/100
1s - loss: 0.0058
Epoch 100/100
1s - loss: 0.0056
Out[16]:
<keras.callbacks.History at 0x128718d0>

In [17]:
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))


Train Score: 296.41 RMSE
Test Score: 509.65 RMSE

In [20]:
testPredict


Out[20]:
array([[ 1981.43615723],
       [ 1992.1418457 ],
       [ 2100.40600586],
       [ 2168.33374023],
       [ 2473.52172852],
       [ 2310.02392578],
       [ 2464.37451172],
       [ 1949.58654785],
       [ 1885.75598145],
       [ 1664.08544922],
       [ 1750.41748047],
       [ 1725.79052734],
       [ 1537.6270752 ],
       [ 1409.41516113],
       [ 1293.01904297],
       [ 1386.03112793],
       [ 1284.80944824],
       [ 1394.8918457 ],
       [ 1273.22619629],
       [ 1293.39733887],
       [ 1211.71679688],
       [ 1399.80554199],
       [ 1239.98327637],
       [ 1392.62475586],
       [ 1276.40551758],
       [ 1436.38439941],
       [ 1433.35852051],
       [ 1471.65856934],
       [ 1395.31591797],
       [ 1387.54345703],
       [ 1367.40905762],
       [ 1359.68640137],
       [ 1272.97888184],
       [ 1320.84277344],
       [ 1278.33850098],
       [ 1317.51367188],
       [ 1216.58496094],
       [ 1240.67443848],
       [ 1225.46899414],
       [ 1263.38220215],
       [ 1240.70080566],
       [ 1218.64953613],
       [ 1218.2623291 ],
       [ 1220.91589355],
       [ 1277.83398438],
       [ 1223.87854004],
       [ 1258.18212891],
       [ 1149.62646484],
       [ 1261.05383301],
       [ 1072.4864502 ],
       [ 1182.48168945],
       [ 1100.25366211],
       [ 1140.48474121],
       [ 1149.3972168 ],
       [ 1124.09191895],
       [ 1145.74353027],
       [ 1091.45532227],
       [ 1141.44592285],
       [ 1059.48547363],
       [ 1109.9720459 ],
       [ 1130.94372559],
       [ 1089.66149902],
       [ 1168.2298584 ],
       [ 1074.25256348],
       [ 1164.63891602],
       [ 1130.89611816],
       [ 1160.32019043],
       [ 1105.93896484],
       [ 1079.69909668],
       [ 1092.48803711],
       [ 1078.41149902],
       [ 1128.30493164],
       [ 1067.12768555],
       [ 1108.90112305],
       [ 1129.04272461],
       [ 1146.38122559],
       [ 1158.16027832],
       [ 1137.99108887],
       [ 1143.46838379],
       [ 1134.16503906],
       [ 1125.65808105],
       [ 1098.08581543],
       [ 1029.3894043 ],
       [ 1063.16723633],
       [ 1028.58959961],
       [ 1159.41296387],
       [ 1081.92871094],
       [ 1132.70556641],
       [ 1123.35046387],
       [ 1138.15319824],
       [ 1221.36816406],
       [ 1196.01708984],
       [ 1211.36743164],
       [ 1191.27746582],
       [ 1216.61047363],
       [ 1175.83874512],
       [ 1182.96337891],
       [ 1162.1965332 ],
       [ 1148.82177734],
       [ 1180.43505859],
       [ 1115.88793945],
       [ 1169.18884277],
       [ 1104.86975098],
       [ 1098.11755371],
       [ 1091.28186035],
       [ 1092.62695312],
       [ 1153.06286621],
       [ 1128.99645996],
       [ 1117.98864746],
       [ 1095.4666748 ],
       [ 1075.85559082],
       [ 1129.02392578]], dtype=float32)

In [18]:
# shift train predictions for plotting
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = numpy.empty_like(dataset)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(trainPredict-10)+(look_back)+1:len(dataset)-1, :] = testPredict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-18-00dfc86a62fb> in <module>()
      6 testPredictPlot = numpy.empty_like(dataset)
      7 testPredictPlot[:, :] = numpy.nan
----> 8 testPredictPlot[len(trainPredict-10)+(look_back)+1:len(dataset)-1, :] = testPredict
      9 # plot baseline and predictions
     10 plt.plot(scaler.inverse_transform(dataset))

ValueError: could not broadcast input array from shape (112,1) into shape (109,1)

lookback = 1, hidden neurons = 4 Train Score: 398.36 RMSE Test Score: 314.96 RMSE

lookback = 3, hidden neurons = 4 Train Score: 387.69 RMSE Test Score: 214.87 RMSE

lookback = 7, hidden neurons = 4 Train Score: 357.99 RMSE Test Score: 146.85 RMSE

lookback = 10, hidden neurons = 8 Train Score: 366.99 RMSE Test Score: 149.24 RMSE


In [ ]: